import google.generativeai as genai from IPython.display import Markdown as md k='AlzaSyCCh8odfwIp10k4IdgKrJczN0YXd3J95cw' genai.configure(api_key=k) model= genai.GenerativeModel(model_name="gemini-1.5-flash") a= input() b= model.generate_content(a) r= b.candidates[0].content.parts[0].text md(r)
# 1 . implementation of various filtering techniques
import cv2
import numpy as np
import matplotlib.pyplot as plt
input_image_path = "Pictures/dog.png"
img = cv2.imread(input_image_path, cv2.IMREAD_GRAYSCALE)
kernel_size = 5
kernel = np.ones((kernel_size, kernel_size), np.float32) / (kernel_size ** 2)
filtered_image = cv2.filter2D(img, -1, kernel)
plt.subplot(1, 3, 1)
plt.imshow(img)
plt.title("non grayscale Image")
plt.axis("off")
plt.subplot(1, 3, 2)
plt.imshow(filtered_image, cmap="gray")
plt.title("Filtered Image")
plt.axis("off")
plt.subplot(1, 3, 3)
plt.imshow(img, cmap="gray")
plt.title("original Image")
plt.axis("off")
plt.show()
# 2 . Implementation of histogram
# Import necessary libraries
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read an image in grayscale
input_image_path = "Pictures/dog.png" # Replace with the path to your image
img = cv2.imread(input_image_path, cv2.IMREAD_GRAYSCALE)
# Check if the image loaded correctly
if img is None:
print("Error: Could not load the image. Please check the path.")
else:
# Apply histogram equalization
equ = cv2.equalizeHist(img)
# Stack images side-by-side for comparison
res = np.hstack((img, equ))
# Display the result using matplotlib
plt.figure(figsize=(10, 5))
plt.imshow(res,cmap='gray')
plt.title("Original Image (Left) vs Histogram Equalized Image (Right)")
plt.axis('off')
plt.show()
# 3 . implementation of various segmentation algorithms
import cv2
import matplotlib.pyplot as plt
import numpy as np
# Load the image in grayscale
image = cv2.imread("Pictures/dog.png", cv2.IMREAD_GRAYSCALE)
# Display the original image
plt.figure()
plt.imshow(image, cmap='gray')
plt.title("Original Image")
plt.axis("off") # Hide axis
plt.show()
# Apply Otsu's thresholding and display the segmented image
otsu = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
plt.figure()
plt.imshow(otsu, cmap='gray')
plt.title("Image Segmentation with Otsu Thresholding")
plt.axis("off") # Hide axis
plt.show()
# 4 . program to implement object labelling
# Import necessary libraries
import cv2
import numpy as np
import matplotlib.pyplot as plt
def detect_and_label_objects(image_path, color_ranges, labels):
# Load the image and check if it's loaded correctly
image = cv2.imread(image_path)
if image is None:
print("Error: Could not load the image. Check the file path.")
return
# Convert the image to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Loop through each color range and corresponding label
for i, (lower, upper) in enumerate(color_ranges):
# Create a mask for the current color
mask = cv2.inRange(hsv, np.array(lower), np.array(upper))
# Find contours in the mask
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Draw bounding box and label for each detected contour
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(image, labels[i], (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Convert BGR to RGB for matplotlib and display the result
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# Example usage with color ranges and labels
color_ranges = [
[(0, 100, 100), (10, 255, 255)], # Red range in HSV
[(25, 100, 100), (35, 255, 255)], # Yellow range in HSV
[(100, 100, 100), (120, 255, 255)] # Blue range in HSV
]
labels = ["Red", "Yellow", "Blue"]
# Run the function with the image file path
detect_and_label_objects("D:\linkedin profile pic 2.jpg", color_ranges, labels)
# 5 . implementation of face recognition system
# Import necessary libraries
import cv2
import matplotlib.pyplot as plt
# Load the pre-trained Haar Cascade for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# Load the image
image = cv2.imread('Pictures/211421243033.png')
if image is None:
print("Error: Image file not found.")
else:
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
# Draw bounding boxes around faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Convert BGR to RGB for matplotlib and display the image
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
# 6 . License Plate identification
import cv2
import numpy as np
import matplotlib.pyplot as plt
def detect_license_plate(image_path):
# Load the image
image = cv2.imread(image_path)
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Use Haar cascades for license plate detection
plate_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_russian_plate_number.xml')
# Detect license plates
plates = plate_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
# Draw rectangles around detected license plates
for (x, y, w, h) in plates:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Convert BGR image to RGB
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Display the result using Matplotlib
plt.imshow(image_rgb)
plt.axis('off') # Hide axis
plt.title("License Plate identification")
plt.show()
# Example usage
detect_license_plate("Pictures/car1.png")
# 7 . implementation of medical images
import cv2
import matplotlib.pyplot as plt
def main():
# Load image in grayscale
image = cv2.imread("Pictures/211421243033.png", cv2.IMREAD_GRAYSCALE)
# Apply Gaussian blur and Otsu's thresholding
blurred = cv2.GaussianBlur(image, (5, 5), 0)
_, thresholded = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Find and draw contours
contours, _ = cv2.findContours(thresholded, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contoured_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
cv2.drawContours(contoured_image, contours, -1, (0, 255, 0), 2)
# Display images
titles = ["Original", "Blurred", "Thresholded", "Contours"]
images = [image, blurred, thresholded, contoured_image]
for i, img in enumerate(images):
plt.subplot(1, 4, i + 1)
plt.imshow(img, cmap='gray')
plt.title(titles[i])
plt.axis("off")
plt.show()
if __name__ == "__main__":
main()
# 8 . EDGE DETECTION, CORNER DETECTION, LINE DETECTION.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Load image in grayscale
image_path = "Pictures/car1.png"
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if image is None:
print("Error: Could not load image.")
else:
# Edge Detection
edges = cv2.Canny(image, 50, 150)
# Corner Detection
corners = cv2.goodFeaturesToTrack(image, 25, 0.01, 10)
if corners is not None:
for x, y in np.int0(corners).reshape(-1, 2):
cv2.circle(image, (x, y), 3, 255, -1)
# Line Detection
lines = cv2.HoughLines(edges, 1, np.pi / 180, 200)
if lines is not None:
for rho, theta in lines[:, 0]:
a, b = np.cos(theta), np.sin(theta)
x0, y0 = a * rho, b * rho
x1, y1 = int(x0 + 1000 * (-b)), int(y0 + 1000 * (a))
x2, y2 = int(x0 - 1000 * (-b)), int(y0 - 1000 * (a))
cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
# Display Results
plt.figure(figsize=(10, 5))
plt.subplot(1, 3, 1), plt.imshow(edges, cmap='gray'), plt.title('Edges')
plt.subplot(1, 3, 2), plt.imshow(image, cmap='gray'), plt.title('Corners & Lines')
plt.axis('off')
plt.show()
C:\Users\ADMIN\AppData\Local\Temp\ipykernel_25496\1550177994.py:17: DeprecationWarning: `np.int0` is a deprecated alias for `np.intp`. (Deprecated NumPy 1.24) for x, y in np.int0(corners).reshape(-1, 2):
# 9 . Face recognition using color model representation
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_image(image, title="Image"):
"""Helper function to display an image in Jupyter Notebook."""
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title(title)
plt.axis('off')
plt.show()
def detect_face_using_skin_color(image_path):
# Load the image
image = cv2.imread(image_path)
if image is None:
print("Error: Could not load the image.")
return
# Convert the image to the HSV color model
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Define skin color range in HSV
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
# Create a binary mask where skin colors are within the range
mask = cv2.inRange(hsv_image, lower_skin, upper_skin)
# Apply the mask to the original image to get only skin regions
skin_region = cv2.bitwise_and(image, image, mask=mask)
# Display the original image and skin-detected region
display_image(image, title="Original Image")
display_image(skin_region, title="Skin Detected Region")
def recognize_faces_opencv(image_path):
# Load the pre-trained face detector
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# Load the image
image = cv2.imread(image_path)
if image is None:
print("Error: Could not load the image.")
return
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
# Draw bounding boxes around detected faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display the result
display_image(image, title="Detected Faces")
# Example usage
image_path = "D:/linkedin profile pic 2.jpg" # Replace with your image path
detect_face_using_skin_color(image_path)
recognize_faces_opencv(image_path)
# 10 . AUTHORIZED FACE RECOGNITION SYSTEM USING FEATURE MATCHING.|
import cv2
import numpy as np
from matplotlib import pyplot as plt
def show_image(image):
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("matches")
plt.axis("off")
plt.show()
def face_recognition(ref_path, test_path, threshold=50):
ref_img = cv2.imread(ref_path, cv2.IMREAD_GRAYSCALE)
test_img = cv2.imread(test_path, cv2.IMREAD_GRAYSCALE)
if ref_img is None or test_img is None:
print("Error loading images.")
return False
# Feature detection
orb = cv2.ORB_create()
kp_ref, des_ref = orb.detectAndCompute(ref_img, None)
kp_test, des_test = orb.detectAndCompute(test_img, None)
# Matching features
matches = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True).match(des_ref, des_test)
# Draw matches
match_img = cv2.drawMatches(ref_img, kp_ref, test_img, kp_test, matches[:20], None)
show_image(match_img)
return len(matches) >= threshold
# Example usage
result = face_recognition("Pictures/211421243033.png", "D:/linkedin profile pic 2.jpg")
print("Access Granted!" if result else "Access Denied!")
Access Granted!
# 11 . IDENTIFYING THE STEREO CORRESPONDENCE OF THE TWO IMAGES
# 12 . HUMAN POSE ESTIMATION
import cv2
import mediapipe as mp
from matplotlib import pyplot as plt
import os
# Initialize MediaPipe Pose model
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
mp_drawing = mp.solutions.drawing_utils
def display_image_with_pose(image_path):
# Load the image
image = cv2.imread(image_path)
if image is None:
print("Error: Could not load image. Check the file path.")
return
# Convert the image to RGB (MediaPipe expects RGB input)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Perform pose detection
results = pose.process(rgb_image)
# Draw pose landmarks on the original image
if results.pose_landmarks:
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
# Display the image with detected pose
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.title("Pose Estimation")
plt.show()
image_name = os.path.splitext(os.path.basename(image_path))[0]
print(f"The pose estimated is '{image_name}'")
# Example usage
image_path = "Pictures/running.png" # Replace with the path to your image file
display_image_with_pose(image_path)
C:\Users\ADMIN\ANACONDA\anaconda3\Lib\site-packages\google\protobuf\symbol_database.py:55: UserWarning: SymbolDatabase.GetPrototype() is deprecated. Please use message_factory.GetMessageClass() instead. SymbolDatabase.GetPrototype() will be removed soon.
warnings.warn('SymbolDatabase.GetPrototype() is deprecated. Please '
The pose estimated is 'running'
# 13 . abnormal detection in traffic video surveillance
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_frame(frame, title="Frame"):
"""Helper function to display a video frame in Jupyter Notebook."""
plt.figure(figsize=(10, 6))
plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
plt.title(title)
plt.axis('off')
plt.show()
def detect_abnormal_traffic(video_path):
cap = cv2.VideoCapture(video_path)
ret, prev_frame = cap.read()
if not ret:
print("Error: Could not read video.")
return
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
frame_count = 0 # To limit the number of frames displayed for testing purposes
while cap.isOpened() and frame_count < 10: # Display only 10 frames for preview
ret, frame = cap.read()
if not ret:
break
# Convert the current frame to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Calculate the absolute difference between the current frame and the previous frame
diff = cv2.absdiff(prev_gray, gray)
# Threshold the difference to highlight areas of change
_, thresh = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)
# Display the frames
display_frame(frame, title="Original Frame")
display_frame(thresh, title="Thresholded Difference")
# Update the previous frame
prev_gray = gray
# Increment frame count to limit the preview
frame_count += 1
cap.release()
# Example usage
video_path = "Downloads/traffic_video.mp4" # Replace with your video file path
detect_abnormal_traffic(video_path)
# 14 . HUMAN ACTION RECOGNITION AND OBJECT LOCALIZATIONS
import cv2
import mediapipe as mp
from matplotlib import pyplot as plt
import os
# Initialize MediaPipe Pose model
mp_pose = mp.solutions.pose
pose = mp_pose.Pose()
mp_drawing = mp.solutions.drawing_utils
def display_image_with_pose(image_path):
# Load the image
image = cv2.imread(image_path)
if image is None:
print("Error: Could not load image. Check the file path.")
return
# Convert the image to RGB (MediaPipe expects RGB input)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Perform pose detection
results = pose.process(rgb_image)
# Draw pose landmarks on the original image
if results.pose_landmarks:
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
# Display the image with detected pose
plt.figure(figsize=(8, 8))
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.title("Pose Estimation")
plt.show()
image_name = os.path.splitext(os.path.basename(image_path))[0]
print(f"The pose estimated is '{image_name}'")
# Example usage
image_path = "Pictures/running.png" # Replace with the path to your image file
display_image_with_pose(image_path)
The pose estimated is 'running'
# 15 . identifying the road structure
import cv2
import numpy as np
import matplotlib.pyplot as plt
def detect_road_structure(image_path):
# Load the image in grayscale
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
if image is None:
print("Error: Could not load image.")
return
# Apply Gaussian Blur to reduce noise
blurred = cv2.GaussianBlur(image, (5, 5), 0)
# Edge detection using Canny
edges = cv2.Canny(blurred, 50, 150)
# Define a region of interest (ROI) polygon
height, width = edges.shape
mask = np.zeros_like(edges)
polygon = np.array([[
(0, height),
(width, height),
(width * 0.6, height * 0.6),
(width * 0.4, height * 0.6)
]], np.int32)
# Fill the ROI polygon with white and apply the mask
cv2.fillPoly(mask, polygon, 255)
road_structure = cv2.bitwise_and(edges, mask)
# Display the result
plt.imshow(road_structure)
plt.title('Detected Road Structure')
plt.axis('off')
plt.show()
# Example usage
detect_road_structure("Pictures/road_img_2.png")
plt.imshow(cv2.imread("Pictures/road_img_2.png", cv2.IMREAD_GRAYSCALE))
plt.title('Original Image')
plt.axis('off')
plt.show()